Allow shadow p.t. code to do unsafe things with shadow locks held.
domain_pause(d);
synchronise_pagetables(~0UL);
- spin_lock(&d->mm.shadow_lock);
+ shadow_lock(&d->mm);
if ( cmd == DOM0_SHADOW_CONTROL_OP_OFF )
{
rc = -EINVAL;
}
- spin_unlock(&d->mm.shadow_lock);
+ shadow_unlock(&d->mm);
domain_unpause(d);
// take the lock and reread gpte
- spin_lock(¤t->mm.shadow_lock);
+ shadow_lock(m);
if ( unlikely(__get_user(gpte, (unsigned long*)&linear_pg_table[va>>PAGE_SHIFT])) )
{
SH_VVLOG("shadow_fault - EXIT: read gpte faulted" );
- spin_unlock(&m->shadow_lock);
+ shadow_unlock(m);
return 0; // propagate to guest
}
if ( unlikely(!(gpte & _PAGE_PRESENT)) )
{
SH_VVLOG("shadow_fault - EXIT: gpte not present (%lx)",gpte );
- spin_unlock(&m->shadow_lock);
+ shadow_unlock(m);
return 0; // we're not going to be able to help
}
else
{ // write fault on RO page
SH_VVLOG("shadow_fault - EXIT: write fault on RO page (%lx)",gpte );
- spin_unlock(&m->shadow_lock);
+ shadow_unlock(m);
return 0; // propagate to guest
// not clear whether we should set accessed bit here...
}
check_pagetable( current, current->mm.pagetable, "post-sf" );
- spin_unlock(&m->shadow_lock);
+ shadow_unlock(m);
return 1; // let's try the faulting instruction again...
#ifndef NDEBUG
-static int crit_count[NR_CPUS];
-static int crit_checking = 1;
+static int crit_count[NR_CPUS], crit_checking_disabled[NR_CPUS];
void disable_criticalregion_checking(void)
{
- crit_checking = 0;
+ int cpu = smp_processor_id();
+ ASSERT(crit_checking_disabled[cpu] >= 0);
+ crit_checking_disabled[cpu]++;
+}
+
+void enable_criticalregion_checking(void)
+{
+ int cpu = smp_processor_id();
+ crit_checking_disabled[cpu]--;
+ ASSERT(crit_checking_disabled[cpu] >= 0);
}
void criticalregion_enter(void)
void ASSERT_no_criticalregion(void)
{
int cpu = smp_processor_id();
- if ( (crit_count[cpu] == 0) || !crit_checking )
+ if ( (crit_count[cpu] == 0) || crit_checking_disabled[cpu] )
return;
disable_criticalregion_checking();
ASSERT(crit_count[cpu] >= 0); /* -ve count is a special kind of bogus! */
#define shadow_mode(_d) ((_d)->mm.shadow_mode)
#define shadow_lock_init(_d) spin_lock_init(&(_d)->mm.shadow_lock)
+#define shadow_lock(_m) spin_lock_nochecking(&(_m)->shadow_lock)
+#define shadow_unlock(_m) spin_unlock_nochecking(&(_m)->shadow_lock)
extern void shadow_mode_init(void);
extern int shadow_mode_control(struct domain *p, dom0_shadow_control_t *sc);
int rc;
ASSERT(local_irq_is_enabled());
//if(spin_is_locked(&m->shadow_lock)) printk("+");
- spin_lock(&m->shadow_lock);
+ shadow_lock(m);
rc = __mark_dirty( m, mfn );
- spin_unlock(&m->shadow_lock);
+ shadow_unlock(m);
return rc;
}
ASSERT(local_irq_is_enabled());
//if(spin_is_locked(&m->shadow_lock)) printk("*");
- spin_lock(&m->shadow_lock);
+ shadow_lock(m);
if( m->shadow_mode == SHM_logdirty )
__mark_dirty( m, gpfn );
res = __shadow_status( m, gpfn );
- if (!res) spin_unlock(&m->shadow_lock);
+ if (!res)
+ shadow_unlock(m);
return res;
}
static inline void put_shadow_status( struct mm_struct *m )
{
- spin_unlock(&m->shadow_lock);
+ shadow_unlock(m);
}
if ( unlikely(mm->shadow_mode) )
{
ASSERT(local_irq_is_enabled());
- spin_lock(&mm->shadow_lock);
-
- __shadow_mk_pagetable( mm );
-
- spin_unlock(&mm->shadow_lock);
+ shadow_lock(mm);
+ __shadow_mk_pagetable(mm);
+ shadow_unlock(mm);
}
SH_VVLOG("leaving shadow_mk_pagetable( gptbase=%08lx, mode=%d ) sh=%08lx",
extern void criticalregion_exit(void);
extern void ASSERT_no_criticalregion(void);
extern void disable_criticalregion_checking(void);
+extern void enable_criticalregion_checking(void);
#define spin_lock(_lock) \
do { criticalregion_enter(); _raw_spin_lock(_lock); } while (0)
#define ASSERT_no_criticalregion() ((void)0)
#define disable_criticalregion_checking() ((void)0)
+#define enable_criticalregion_checking() ((void)0)
#define spin_lock(_lock) _raw_spin_lock(_lock)
#define spin_trylock(_lock) _raw_spin_trylock(_lock)
#endif
+/*
+ * Use these if you have taken special care to ensure that certain unsafe
+ * things can occur in your critical region (e.g., faults, user-space
+ * accesses).
+ */
+#define spin_lock_nochecking(_lock) _raw_spin_lock(_lock)
+#define spin_trylock_nochecking(_lock) _raw_spin_trylock(_lock)
+#define spin_unlock_nochecking(_lock) _raw_spin_unlock(_lock)
+
#endif /* __SPINLOCK_H__ */